import face_recognition
from PIL import Image, ImageDraw, ImageShow
import cv2
from IPython.display import display
import numpy as np
from matplotlib import pyplot as plt
import scipy
from scipy.spatial import distance
from scipy import signal
import hashlib
from dtaidistance import dtw
from dtaidistance import dtw_visualisation as dtwvis
%matplotlib inline
image1_name = "SAM_ID.jpg"
image2_name = "SAM5.jpg"
image1_f = "face_" + image1_name
image2_f = "face_" + image2_name
image1_fl = "face_LandMark_" + image1_name
image2_fl = "face_LandMark_" + image2_name
# Load first Image (ID)
image1 = face_recognition.load_image_file(image1_name)
image1_encoding = face_recognition.face_encodings(image1)[0]
pil_image = Image.fromarray(image1)
display(pil_image)
# Locate Face on the ID
face_locations = face_recognition.face_locations(image1)
face_location = face_locations[0]
top, right, bottom, left = face_location
face_image1 = image1[top:bottom, left:right]
image1_encoding = face_recognition.face_encodings(face_image1)[0]
pil_image = Image.fromarray(face_image1)
#display(pil_image)
pil_image.save(image1_f)
# Load the image and detect face landmarks
image = face_recognition.load_image_file(image1_f)
face_landmarks_list = face_recognition.face_landmarks(image)
# Make a PIL image from the loaded image and then get a drawing object
img = Image.fromarray(image, 'RGB')
img_draw = ImageDraw.Draw(img)
# Draw all the features for the first face
face_landmarks = face_landmarks_list[0] # Get the first object corresponding to the first face
img_draw.line(face_landmarks['chin'])
img_draw.line(face_landmarks['left_eyebrow'])
img_draw.line(face_landmarks['right_eyebrow'])
img_draw.line(face_landmarks['nose_bridge'])
img_draw.line(face_landmarks['nose_tip'])
img_draw.line(face_landmarks['left_eye'])
img_draw.line(face_landmarks['right_eye'])
img_draw.line(face_landmarks['top_lip'])
img_draw.line(face_landmarks['bottom_lip'])
img.save(image1_fl)
img # Show the Face image for with Landmarks
# Load Second Image (Person Photo to be compared)
image2 = face_recognition.load_image_file(image2_name)
image2_encoding = face_recognition.face_encodings(image2)[0]
pil_image = Image.fromarray(image2)
display(pil_image)
# Locate Face on the Person Photo
face_locations = face_recognition.face_locations(image2)
face_location = face_locations[0]
top, right, bottom, left = face_location
face_image2 = image2[top:bottom, left:right]
image2_encoding = face_recognition.face_encodings(face_image2)[0]
pil_image = Image.fromarray(face_image2)
#display(pil_image)
pil_image.save(image2_f)
# Load the image and detect face landmarks
image = face_recognition.load_image_file(image2_f)
face_landmarks_list = face_recognition.face_landmarks(image)
# Make a PIL image from the loaded image and then get a drawing object
img = Image.fromarray(image, 'RGB')
img_draw = ImageDraw.Draw(img)
# Draw all the features for the first face
face_landmarks = face_landmarks_list[0] # Get the first object corresponding to the first face
img_draw.line(face_landmarks['chin'])
img_draw.line(face_landmarks['left_eyebrow'])
img_draw.line(face_landmarks['right_eyebrow'])
img_draw.line(face_landmarks['nose_bridge'])
img_draw.line(face_landmarks['nose_tip'])
img_draw.line(face_landmarks['left_eye'])
img_draw.line(face_landmarks['right_eye'])
img_draw.line(face_landmarks['top_lip'])
img_draw.line(face_landmarks['bottom_lip'])
img.save(image2_fl)
img # Show the Face image for with Landmarks
#Comapre and print result (True/False)
result = face_recognition.compare_faces([image1_encoding], image2_encoding)
print("Match is ", result)
Match is [True]
#Calculate % Match (distance/diff - smaller is better - Threshold is < 0.6)
face_distances = face_recognition.face_distance([image1_encoding],
image2_encoding)
Distance = face_distances[0]
print("Distance is ", Distance)
Distance is 0.5389912936019888
# Review Encdoed Image Array Properties
image1_encoding
array([-0.02186848, -0.01965787, 0.00259056, -0.02478239, -0.01664969,
-0.10610676, 0.01094741, -0.10516758, 0.1641904 , -0.05527563,
0.04666785, 0.03058797, -0.21281168, -0.05096135, -0.03531396,
0.01876267, -0.1471884 , -0.17623644, -0.07147589, -0.13817489,
0.02247795, -0.01721558, 0.00287753, -0.03523913, -0.13280749,
-0.26787668, -0.11253233, -0.08611919, 0.11226708, -0.07205713,
0.04050874, 0.04379646, -0.23793121, -0.0674388 , 0.01317088,
0.06752713, 0.0316345 , -0.01287185, 0.21310566, 0.09798969,
-0.16457979, 0.05986524, 0.02185974, 0.31062165, 0.16524121,
0.03121643, 0.02749716, -0.08912773, 0.12348939, -0.15608677,
0.08625095, 0.19935334, 0.17486231, 0.03266001, 0.09862867,
-0.07852125, 0.08339813, 0.05880124, -0.25655615, 0.09623435,
0.14796887, -0.0164279 , -0.03338929, -0.00348215, 0.07984443,
-0.01001007, -0.04321972, -0.10640779, 0.18091317, -0.23859385,
-0.01918118, 0.05449646, -0.05081759, -0.1334236 , -0.27976078,
0.00121856, 0.34986931, 0.14645594, -0.09595425, 0.09302779,
-0.09329277, -0.05850637, 0.10993777, 0.02037486, -0.10622911,
0.06702861, -0.04867886, 0.08697376, 0.17849031, 0.04487002,
-0.01593085, 0.11293548, 0.03791223, -0.03949156, 0.01875556,
0.03749731, -0.08859247, 0.00363185, -0.10919598, 0.0157833 ,
0.02225298, -0.10997678, 0.03893253, 0.07322352, -0.22097987,
0.13529074, 0.02282795, -0.10523402, -0.0472881 , 0.00461207,
-0.07175812, -0.02093117, 0.11860759, -0.31445962, 0.17815524,
0.23723271, -0.0251813 , 0.06468315, -0.0440918 , 0.01249702,
-0.02351607, -0.04970423, -0.14459148, -0.11708979, -0.07364641,
-0.01232601, 0.03116286, 0.04206271])
#plt.figure(figsize=(20,10))
#plt.plot(image1_encoding)
#plt.show()
image2_encoding
array([-0.07551383, 0.08232871, 0.01339892, -0.05407017, -0.04303373,
-0.02614599, -0.01839652, -0.04095102, 0.16063437, 0.01018693,
0.14140368, -0.02759282, -0.26631302, -0.08233918, 0.02208034,
0.03428954, -0.22120175, -0.17079294, -0.02433493, -0.05973022,
0.05689525, -0.02454692, 0.0062798 , 0.01991949, -0.18420176,
-0.37112671, -0.07942626, -0.16381201, 0.12649101, -0.17628941,
0.06931719, 0.05553307, -0.16552632, -0.04630527, 0.00108975,
0.14467177, -0.02062003, -0.06426345, 0.21003476, 0.01263061,
-0.17689484, -0.01729994, -0.04414195, 0.33569509, 0.16239862,
0.02254995, 0.04208624, -0.03692794, 0.14499682, -0.22603855,
0.06833099, 0.23409024, 0.10641924, 0.05426788, 0.1422513 ,
-0.07233252, 0.08305469, 0.0683917 , -0.30100444, 0.04065674,
0.07731198, 0.0034715 , 0.00200938, -0.03578103, 0.14419319,
-0.01067216, -0.11836021, -0.09787194, 0.16235006, -0.16451362,
-0.06814115, 0.09565461, -0.0922716 , -0.16293561, -0.32310027,
0.02707338, 0.34568352, 0.09492575, -0.14607486, 0.08619006,
-0.06976053, -0.09157435, 0.04324291, 0.05139826, -0.10698467,
0.06197815, -0.1188253 , 0.08141133, 0.13965198, 0.01005168,
-0.02763838, 0.12622152, -0.00140268, -0.00343214, 0.06285081,
0.12890202, -0.11133862, -0.00999878, -0.10857394, -0.00986843,
0.15157364, -0.0918166 , -0.0457947 , 0.12101953, -0.18915989,
0.17897312, 0.03275399, -0.06551629, -0.04203849, -0.05206411,
-0.12930499, 0.00281858, 0.1891275 , -0.37278903, 0.18036717,
0.22302727, -0.02749088, 0.06248052, 0.00267291, -0.04138476,
-0.03864187, -0.06949662, -0.18342799, -0.1382897 , -0.01634644,
0.01511095, -0.0344664 , 0.00262818])
#plt.figure(figsize=(20,10))
#plt.plot(image2_encoding)
#plt.show()
# Comparing the two Encoded Image Arrayes
# Overlay Plot for the two compared Face Image "Signals/Waves"
plt.figure(figsize=(20,10))
plt.plot(image1_encoding)
plt.plot(image2_encoding)
plt.show()
# Linear Distance - the two Encoded Image Arrayes
dist = np.linalg.norm(image1_encoding-image2_encoding)
print(dist)
0.5389912936019888
# Euclidian Disoatance - the two Encoded Image Arrayes
dist = distance.euclidean(image1_encoding, image2_encoding)
print(dist)
0.5389912936019888
# DTW - Dynamic Time Wrapping
path = dtw.warping_path(image1_encoding, image2_encoding)
dtwvis.plot_warping(image1_encoding, image2_encoding, path, filename="DTW.png")
(None, None)
img = Image.open("DTW.png")
img
distance = dtw.distance(image1_encoding, image2_encoding)
print(distance)
0.5244728760775108
# Signal Cross-Correlation ... Threshold > 90%
accuracy = np.corrcoef(image1_encoding, image2_encoding)
print (accuracy)
accuracy_s = np.square(accuracy)
print (accuracy_s)
accuracy_percent = np.sum(accuracy_s) / 4.0 * 100.0
print(accuracy_percent)
[[1. 0.92705406] [0.92705406 1. ]] [[1. 0.85942923] [0.85942923 1. ]] 92.97146145089299
image_coor = scipy.signal.correlate(image1_encoding, image2_encoding)
plt.plot(image_coor)
plt.show()